Funding Successful Projects - Hackerearth Contest

Link: Funding Sucessful Projects

Author: Sethu Iyer


In [1]:
import pandas as pd
import numpy as np
import datetime
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
import xgboost as xgb


c:\users\sethuiyer\appdata\local\programs\python\python35\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

In [2]:
pd.set_option('display.max_colwidth',100)
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')

Creating Features from datetime


In [12]:
#First, convert the timestamp to datetime object
unix_cols = ['deadline','state_changed_at','launched_at','created_at']

for cols in unix_cols:
    train[cols] = train[cols].apply(lambda timestamp: datetime.datetime.fromtimestamp(int(timestamp)))
    test[cols] = test[cols].apply(lambda timestamp: datetime.datetime.fromtimestamp(int(timestamp)))

In [15]:
#time difference between 1) launched_at and created_at 2) deadline and launched_at
train['launch_create'] = train.apply(lambda row: np.log((row['launched_at'] - row['created_at']).total_seconds()),axis=1)
test['launch_create'] = test.apply(lambda row: np.log((row['launched_at'] - row['created_at']).total_seconds()),axis=1)

In [16]:
train['deadline_launch'] = train.apply(lambda row: np.log((row['deadline'] - row['launched_at']).total_seconds()),axis=1)
test['deadline_launch'] = test.apply(lambda row: np.log((row['deadline'] - row['launched_at']).total_seconds()),axis=1)

Now, normalizing the currency


In [17]:
total_currency = train['currency'].append(test['currency'])
print((pd.unique(total_currency)))


['USD' 'GBP' 'CAD' 'AUD' 'NZD' 'EUR' 'SEK' 'NOK' 'DKK' 'CHF' 'HKD' 'SGD'
 'MXN']

In [18]:
conversion_factor={ 'USD': 1.00,
                    'GBP': 1.28,
                   'CAD' : 0.75,
                   'AUD': 0.76,
                   'NZD': 0.73,
                   'EUR': 1.12,
                   'SEK':0.11,
                   'NOK':0.12,
                   'DKK':0.15,
                   'CHF':1.03,
                   'HKD':0.13,
                   'SGD': 0.72,
                   'MXN':0.056}

train['goal'] = train.apply(lambda row : row['goal'] * conversion_factor[row['currency']],axis=1)
test['goal'] = test.apply((lambda row : row['goal'] * conversion_factor[row['currency']]),axis=1)

Now, Creating some text features


In [19]:
train['name_count'] = train['name'].str.split().str.len()
train['desc_count'] = train['desc'].str.split().str.len()

test['name_count'] = test['name'].str.split().str.len()
test['desc_count'] = test['desc'].str.split().str.len()

train['keywords_len'] = train['keywords'].str.len()
train['keywords_count'] = train['keywords'].str.split('-').str.len()

test['keywords_len'] = test['keywords'].str.len()
test['keywords_count'] = test['keywords'].str.split('-').str.len()

Creating more complex text feature


In [24]:
import re
def desc_clean(word):
    p1 = re.sub(pattern='(\W+)|(\d+)|(\s+)',repl=' ',string=word)
    p1 = p1.lower()
    return p1

kickdesc = pd.Series(train['desc'].tolist() + test['desc'].tolist()).astype(str)
kickdesc=kickdesc.map(desc_clean)

In [25]:
stop = set(stopwords.words('english'))
kickdesc = [[x for x in x.split() if x not in stop] for x in kickdesc]
stemmer = SnowballStemmer(language='english')
kickdesc = [[stemmer.stem(x) for x in x] for x in kickdesc]
kickdesc = [[x for x in x if len(x) > 2] for x in kickdesc]
kickdesc = [' '.join(x) for x in kickdesc]

In [26]:
cv = CountVectorizer(max_features=300)
combine=pd.DataFrame(cv.fit_transform(kickdesc).todense())
combine.rename(columns= lambda x: 'variable_'+ str(x), inplace=True)

In [27]:
train_text = combine[:train.shape[0]]
test_text = combine[train.shape[0]:]
test_text.reset_index(drop=True,inplace=True)

Creating some more text features


In [30]:
len_feats = ['name_len','desc_len']
cols_to_use=['name','desc']
for i in np.arange(2):
    train[len_feats[i]] = train[cols_to_use[i]].apply(str).apply(len)
    test[len_feats[i]] = test[cols_to_use[i]].apply(str).apply(len)

Finalize the training and testing data before training


In [31]:
cols_to_use = ['name_len','desc_len','keywords_len','name_count','desc_count','keywords_count','goal','launch_create','deadline_launch']

target = train['final_status']
train = train[cols_to_use]
test = test[cols_to_use]

In [32]:
X_train = pd.concat([train, train_text],axis=1)
X_test = pd.concat([test, test_text],axis=1)

It's training time!


In [33]:
dtrain = xgb.DMatrix(data=X_train, label = target)
dtest = xgb.DMatrix(data=X_test)

In [34]:
params = {
    'objective':'binary:logistic',
    'eval_metric':'error',
    'eta':0.025,
    'max_depth':6,
    'subsample':0.7,
    'colsample_bytree':0.7,
    'min_child_weight':5
    
}

In [36]:
bst = xgb.cv(params, dtrain, num_boost_round=1000, early_stopping_rounds=40,nfold=5,verbose_eval=10)


[0]	train-error:0.315239+0.000786469	test-error:0.317595+0.00302803
[10]	train-error:0.311748+0.00140474	test-error:0.314747+0.00238244
[20]	train-error:0.309938+0.00135817	test-error:0.313461+0.00275844
[30]	train-error:0.307896+0.000938205	test-error:0.312102+0.00199129
[40]	train-error:0.306194+0.000681755	test-error:0.310936+0.00222002
[50]	train-error:0.304435+0.000868213	test-error:0.309346+0.00200397
[60]	train-error:0.303325+0.000811598	test-error:0.308449+0.0023855
[70]	train-error:0.30157+0.000816076	test-error:0.307403+0.00218977
[80]	train-error:0.299995+0.00073359	test-error:0.306146+0.0022786
[90]	train-error:0.298793+0.000781755	test-error:0.305239+0.00214781
[100]	train-error:0.297748+0.000790003	test-error:0.304222+0.0021566
[110]	train-error:0.296742+0.000669909	test-error:0.303723+0.00163475
[120]	train-error:0.295817+0.000630149	test-error:0.302992+0.00177278
[130]	train-error:0.295008+0.000650693	test-error:0.30203+0.0018652
[140]	train-error:0.293944+0.000617596	test-error:0.301623+0.00225772
[150]	train-error:0.293279+0.000589511	test-error:0.300966+0.00232957
[160]	train-error:0.292374+0.000423867	test-error:0.300587+0.00254873
[170]	train-error:0.291894+0.000493364	test-error:0.300042+0.00259174
[180]	train-error:0.291142+0.000447689	test-error:0.29957+0.00263728
[190]	train-error:0.290456+0.000465127	test-error:0.299209+0.00245562
[200]	train-error:0.289926+0.000300983	test-error:0.29871+0.0026168
[210]	train-error:0.289249+0.000272225	test-error:0.298136+0.00278628
[220]	train-error:0.288691+0.00037546	test-error:0.297942+0.00248626
[230]	train-error:0.287933+0.000373337	test-error:0.297554+0.00259198
[240]	train-error:0.287258+0.000368768	test-error:0.297332+0.00230263
[250]	train-error:0.286566+0.000453324	test-error:0.297119+0.0021935
[260]	train-error:0.286143+0.000480822	test-error:0.296694+0.00203692
[270]	train-error:0.285649+0.000504702	test-error:0.296675+0.00216408
[280]	train-error:0.285225+0.000602094	test-error:0.296213+0.00203117
[290]	train-error:0.284654+0.000602347	test-error:0.296241+0.00219619
[300]	train-error:0.284285+0.000443388	test-error:0.295824+0.00209794
[310]	train-error:0.283644+0.000525295	test-error:0.2955+0.00209633
[320]	train-error:0.283343+0.00061536	test-error:0.295473+0.00205584
[330]	train-error:0.282851+0.000556045	test-error:0.295223+0.00187194
[340]	train-error:0.282252+0.000410802	test-error:0.294927+0.00194406
[350]	train-error:0.281833+0.000497332	test-error:0.294844+0.00182909
[360]	train-error:0.281343+0.000494241	test-error:0.294622+0.00181612
[370]	train-error:0.28074+0.000372459	test-error:0.294086+0.00165167
[380]	train-error:0.280391+0.000248586	test-error:0.294169+0.00179752
[390]	train-error:0.279995+0.000209866	test-error:0.293753+0.00162398
[400]	train-error:0.27966+0.000224273	test-error:0.293956+0.00163293
[410]	train-error:0.279237+0.000240321	test-error:0.293965+0.00159737
[420]	train-error:0.278749+0.000232209	test-error:0.293808+0.00153236
[430]	train-error:0.278423+0.000165318	test-error:0.293734+0.00154197
[440]	train-error:0.278044+0.000227689	test-error:0.293762+0.00170293
[450]	train-error:0.277373+0.000324512	test-error:0.293595+0.00169109
[460]	train-error:0.277043+0.000220998	test-error:0.293382+0.00160698
[470]	train-error:0.276596+0.000193554	test-error:0.293207+0.00150978
[480]	train-error:0.276166+0.000321578	test-error:0.293281+0.00183186
[490]	train-error:0.275623+0.000378994	test-error:0.293244+0.00189205
[500]	train-error:0.275373+0.000468219	test-error:0.293373+0.00183609
[510]	train-error:0.274955+0.000574439	test-error:0.292929+0.002033
[520]	train-error:0.27462+0.000505756	test-error:0.292828+0.00187148
[530]	train-error:0.274245+0.000457024	test-error:0.292883+0.00206126
[540]	train-error:0.273769+0.000562616	test-error:0.292587+0.00204954
[550]	train-error:0.27341+0.000645683	test-error:0.292476+0.0023613
[560]	train-error:0.272994+0.000683077	test-error:0.292264+0.00213156
[570]	train-error:0.272634+0.000659416	test-error:0.292153+0.00202745
[580]	train-error:0.272199+0.000670849	test-error:0.292088+0.00197809
[590]	train-error:0.271852+0.000579992	test-error:0.291995+0.00203614
[600]	train-error:0.271526+0.000612456	test-error:0.292032+0.00216537
[610]	train-error:0.271269+0.000579962	test-error:0.292162+0.00202512
[620]	train-error:0.270953+0.000581782	test-error:0.292014+0.00201798
[630]	train-error:0.270467+0.000547779	test-error:0.291792+0.00199251
[640]	train-error:0.270137+0.00056061	test-error:0.291986+0.00178127
[650]	train-error:0.269785+0.000678619	test-error:0.291755+0.00191478
[660]	train-error:0.269327+0.000568353	test-error:0.291533+0.0018293
[670]	train-error:0.269011+0.000620289	test-error:0.291385+0.00178354
[680]	train-error:0.268751+0.000645131	test-error:0.291468+0.00196489
[690]	train-error:0.268372+0.000619726	test-error:0.291366+0.0021829
[700]	train-error:0.26797+0.000702943	test-error:0.29132+0.00220548
[710]	train-error:0.267681+0.000622537	test-error:0.291071+0.00196498
[720]	train-error:0.267288+0.000706143	test-error:0.291043+0.00198403
[730]	train-error:0.266983+0.000645231	test-error:0.29108+0.00184812
[740]	train-error:0.266691+0.000735194	test-error:0.290978+0.00203883
[750]	train-error:0.266361+0.000731969	test-error:0.290904+0.00195183
[760]	train-error:0.266187+0.000713048	test-error:0.29096+0.00215193
[770]	train-error:0.265942+0.000714857	test-error:0.290756+0.00204698
[780]	train-error:0.265658+0.000709981	test-error:0.290812+0.0020717
[790]	train-error:0.265258+0.000706291	test-error:0.290589+0.00210501
[800]	train-error:0.26489+0.000656897	test-error:0.290673+0.00190387
[810]	train-error:0.264597+0.000732581	test-error:0.290571+0.00213222
[820]	train-error:0.264342+0.000735241	test-error:0.290479+0.00237082
[830]	train-error:0.263965+0.0006519	test-error:0.290516+0.00227529
[840]	train-error:0.263723+0.000711161	test-error:0.290442+0.00216522
[850]	train-error:0.263334+0.000585349	test-error:0.290238+0.00216131
[860]	train-error:0.263091+0.000526822	test-error:0.290146+0.00202669
[870]	train-error:0.262673+0.000504867	test-error:0.290275+0.00195305
[880]	train-error:0.262458+0.000515963	test-error:0.290062+0.00188352
[890]	train-error:0.262222+0.000517274	test-error:0.29022+0.00211965
[900]	train-error:0.261864+0.000690435	test-error:0.290007+0.00197233
[910]	train-error:0.261642+0.000621772	test-error:0.289924+0.00190545
[920]	train-error:0.261313+0.000672809	test-error:0.289951+0.00191195
[930]	train-error:0.261066+0.000712011	test-error:0.289887+0.00181031
[940]	train-error:0.260844+0.000661408	test-error:0.290127+0.00188669
[950]	train-error:0.260627+0.00062046	test-error:0.289868+0.00186479

In [39]:
bst_train = xgb.train(params, dtrain, num_boost_round=900)
p_test = bst_train.predict(dtest)

In [38]:
sub = pd.DataFrame()
test = pd.read_csv('test.csv')
sub['project_id'] = test['project_id']
sub['final_status'] = p_test
sub['final_status'] = [1 if x > 0.5 else 0 for x in sub['final_status']]
sub.to_csv("xgb_with_python_feats.csv",index=False) #70.60